#include <asm/vmx_vcpu.h>
#include <asm/vmx_vpd.h>
#include <asm/pal.h>
+#include <asm/vhpt.h>
#include <public/hvm/ioreq.h>
#define CONFIG_DOMAIN0_CONTIGUOUS
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
-extern unsigned long vhpt_paddr, vhpt_pend;
if (!mm->pgd) {
printk("assign_new_domain_page: domain pgd must exist!\n");
printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
return(p);
}
-if (unlikely(page_to_maddr(p) > vhpt_paddr && page_to_maddr(p) < vhpt_pend)) {
- printf("assign_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p));
-}
+ if (unlikely(page_to_maddr(p) > __get_cpu_var(vhpt_paddr)
+ && page_to_maddr(p) < __get_cpu_var(vhpt_pend))) {
+ printf("assign_new_domain_page: reassigned vhpt page %p!!\n",
+ page_to_maddr(p));
+ }
set_pte(pte, pfn_pte(page_to_maddr(p) >> PAGE_SHIFT,
__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
}
unsigned long rreg = REGION_NUMBER(rr);
ia64_rr rrv, newrrv, memrrv;
unsigned long newrid;
- extern unsigned long vhpt_paddr;
if (val == -1) return 1;
newrrv.rid = newrid;
newrrv.ve = 1; // VHPT now enabled for region 7!!
newrrv.ps = PAGE_SHIFT;
- if (rreg == 0) v->arch.metaphysical_saved_rr0 =
- vmMangleRID(newrrv.rrval);
- if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
- v->arch.privregs, vhpt_paddr, pal_vaddr);
+ if (rreg == 0)
+ v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
+ else if (rreg == 7)
+ ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
+ v->arch.privregs, __get_cpu_var(vhpt_paddr),
+ pal_vaddr);
else set_rr(rr,newrrv.rrval);
#endif
return 1;
#include <asm/dma.h>
#include <asm/vhpt.h>
-unsigned long vhpt_paddr, vhpt_pend, vhpt_pte;
+DEFINE_PER_CPU (unsigned long, vhpt_paddr);
+DEFINE_PER_CPU (unsigned long, vhpt_pend);
void vhpt_flush(void)
{
}
#endif
-void vhpt_map(void)
+static void vhpt_map(unsigned long pte)
{
unsigned long psr;
psr = ia64_clear_ic();
- ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, vhpt_pte, VHPT_SIZE_LOG2);
+ ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, pte, VHPT_SIZE_LOG2);
ia64_set_psr(psr);
ia64_srlz_i();
}
void vhpt_init(void)
{
unsigned long vhpt_total_size, vhpt_alignment;
+ unsigned long paddr, pte;
struct page_info *page;
#if !VHPT_ENABLED
return;
printf("vhpt_init: can't allocate VHPT!\n");
while(1);
}
- vhpt_paddr = page_to_maddr(page);
- vhpt_pend = vhpt_paddr + vhpt_total_size - 1;
- printf("vhpt_init: vhpt paddr=%p, end=%p\n",vhpt_paddr,vhpt_pend);
- vhpt_pte = pte_val(pfn_pte(vhpt_paddr >> PAGE_SHIFT, PAGE_KERNEL));
- vhpt_map();
+ paddr = page_to_maddr(page);
+ __get_cpu_var(vhpt_paddr) = paddr;
+ __get_cpu_var(vhpt_pend) = paddr + vhpt_total_size - 1;
+ printf("vhpt_init: vhpt paddr=%p, end=%p\n",
+ paddr, __get_cpu_var(vhpt_pend));
+ pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL));
+ vhpt_map(pte);
ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
VHPT_ENABLED);
vhpt_flush();
extern void vhpt_insert (unsigned long vadr, unsigned long ptr,
unsigned logps);
extern void vhpt_flush(void);
+
+/* Currently the VHPT is allocated per CPU. */
+DECLARE_PER_CPU (unsigned long, vhpt_paddr);
+DECLARE_PER_CPU (unsigned long, vhpt_pend);
+
#endif /* !__ASSEMBLY */
#if !VHPT_ENABLED